bitkeeper revision 1.1236.33.2 (4236b517THiLxPjnIZVybs7stl7QFQ)
authormafetter@fleming.research <mafetter@fleming.research>
Tue, 15 Mar 2005 10:12:39 +0000 (10:12 +0000)
committermafetter@fleming.research <mafetter@fleming.research>
Tue, 15 Mar 2005 10:12:39 +0000 (10:12 +0000)
Make validate_(pte|pde)_changes a litter smarter.
Avoid some unnecessary calls to __shadow_status.
Added an early out for __shadow_status.

Signed-off-by: michael.fetterman@cl.cam.ac.uk
xen/arch/x86/audit.c
xen/arch/x86/shadow.c
xen/include/asm-x86/mm.h
xen/include/asm-x86/shadow.h
xen/include/xen/perfc_defn.h

index 1c5b89fa2f066a6db3a03bb9ef31570b8817056b..7fcc8c43773cafb5ef45454d15c6ecc9731434ed 100644 (file)
 #include <xen/kernel.h>
 #include <xen/lib.h>
 #include <xen/mm.h>
-//#include <xen/sched.h>
-//#include <xen/errno.h>
 #include <xen/perfc.h>
-//#include <xen/irq.h>
-//#include <xen/softirq.h>
 #include <asm/shadow.h>
 #include <asm/page.h>
 #include <asm/flushtlb.h>
-//#include <asm/io.h>
-//#include <asm/uaccess.h>
-//#include <asm/domain_page.h>
-//#include <asm/ldt.h>
 
 // XXX SMP bug -- these should not be statics...
 //
 static int ttot=0, ctot=0, io_mappings=0, lowmem_mappings=0;
 static int l1, l2, oos_count, page_count;
 
-#define FILE_AND_LINE 1
+#define FILE_AND_LINE 0
 
 #if FILE_AND_LINE
 #define adjust(_p, _a) _adjust((_p), (_a), __FILE__, __LINE__)
@@ -73,7 +65,7 @@ int audit_adjust_pgtables(struct domain *d, int dir, int noisy)
             if ( page_get_owner(page) == NULL )
             {
                 APRINTK("adjust(mfn=%p, dir=%d, adjtype=%d) owner=NULL",
-                        page_to_pfn(page), dir, adjtype, file, line);
+                        page_to_pfn(page), dir, adjtype);
                 errors++;
             }
 
index ea690b94ffb8ddceaed57310c10ee32062996633..e087574b86a8dc4aa6c5cc64abac298f0ee74bf0 100644 (file)
@@ -60,7 +60,7 @@ shadow_promote(struct domain *d, unsigned long gpfn, unsigned long gmfn,
         __shadow_sync_mfn(d, gmfn);
     }
 
-    if ( unlikely(mfn_is_page_table(gmfn)) )
+    if ( unlikely(page_is_page_table(page)) )
     {
         min_type = shadow_max_pgtable_type(d, gpfn) + PGT_l1_shadow;
         max_type = new_type;
@@ -99,7 +99,7 @@ shadow_promote(struct domain *d, unsigned long gpfn, unsigned long gmfn,
     if ( get_page_type(page, PGT_base_page_table) )
     {
         put_page_type(page);
-        set_bit(_PGC_page_table, &frame_table[gmfn].count_info);
+        set_bit(_PGC_page_table, &page->count_info);
     }
     else
     {
index 7cb895e9fc13cb832010d4a5a1343c272fb0e3ad..4ae9f09f08586ddab15613ed98b74573bd6050ba 100644 (file)
@@ -129,8 +129,6 @@ static inline u32 pickle_domptr(struct domain *domain)
 #define page_get_owner(_p)    (unpickle_domptr((_p)->u.inuse._domain))
 #define page_set_owner(_p,_d) ((_p)->u.inuse._domain = pickle_domptr(_d))
 
-#define page_out_of_sync(_p)  ((_p)->count_info & PGC_out_of_sync)
-
 #define SHARE_PFN_WITH_DOMAIN(_pfn, _dom)                                   \
     do {                                                                    \
         page_set_owner((_pfn), (_dom));                                     \
@@ -235,22 +233,6 @@ static inline int get_page_and_type(struct pfn_info *page,
     return rc;
 }
 
-static inline int mfn_is_page_table(unsigned long mfn)
-{
-    if ( !pfn_is_ram(mfn) )
-        return 0;
-
-    return frame_table[mfn].count_info & PGC_page_table;
-}
-
-static inline int page_is_page_table(struct pfn_info *page)
-{
-    if ( !pfn_is_ram(page_to_pfn(page)) )
-        return 0;
-
-    return page->count_info & PGC_page_table;
-}
-
 #define ASSERT_PAGE_IS_TYPE(_p, _t)                            \
     ASSERT(((_p)->u.inuse.type_info & PGT_type_mask) == (_t)); \
     ASSERT(((_p)->u.inuse.type_info & PGT_count_mask) != 0)
index 70abf82e8a89f1c1e256bb4e08c98ce60eb34481..6ec919c6592fd91469ba704d56b6ae3960bd94d9 100644 (file)
@@ -68,6 +68,33 @@ static inline unsigned long __shadow_status(
 
 extern void vmx_shadow_clear_state(struct domain *);
 
+static inline int page_is_page_table(struct pfn_info *page)
+{
+    return page->count_info & PGC_page_table;
+}
+
+static inline int mfn_is_page_table(unsigned long mfn)
+{
+    if ( !pfn_is_ram(mfn) )
+        return 0;
+
+    return frame_table[mfn].count_info & PGC_page_table;
+}
+
+static inline int page_out_of_sync(struct pfn_info *page)
+{
+    return page->count_info & PGC_out_of_sync;
+}
+
+static inline int mfn_out_of_sync(unsigned long mfn)
+{
+    if ( !pfn_is_ram(mfn) )
+        return 0;
+
+    return frame_table[mfn].count_info & PGC_out_of_sync;
+}
+
+
 /************************************************************************/
 
 static void inline
@@ -565,9 +592,10 @@ static inline void l2pde_general(
 static inline void l2pde_propagate_from_guest(
     struct domain *d, unsigned long *gpde_p, unsigned long *spde_p)
 {
-    unsigned long gpde = *gpde_p, sl1mfn;
+    unsigned long gpde = *gpde_p, sl1mfn = 0;
 
-    sl1mfn =  __shadow_status(d, gpde >> PAGE_SHIFT, PGT_l1_shadow);
+    if ( gpde & _PAGE_PRESENT )
+        sl1mfn =  __shadow_status(d, gpde >> PAGE_SHIFT, PGT_l1_shadow);
     l2pde_general(d, gpde_p, spde_p, sl1mfn);
 }
     
@@ -583,7 +611,7 @@ validate_pte_change(
 {
     unsigned long old_spte, new_spte;
 
-    perfc_incrc(validate_pte_change);
+    perfc_incrc(validate_pte_calls);
 
 #if 0
     FSH_LOG("validate_pte(old=%p new=%p)\n", old_pte, new_pte);
@@ -595,8 +623,11 @@ validate_pte_change(
 
     // only do the ref counting if something important changed.
     //
-    if ( (old_spte ^ new_spte) & (PAGE_MASK | _PAGE_RW | _PAGE_PRESENT) )
+    if ( ((old_spte | new_spte) & _PAGE_PRESENT ) &&
+         ((old_spte ^ new_spte) & (PAGE_MASK | _PAGE_RW | _PAGE_PRESENT)) )
     {
+        perfc_incrc(validate_pte_changes);
+
         if ( new_spte & _PAGE_PRESENT )
             shadow_get_page_from_l1e(mk_l1_pgentry(new_spte), d);
         if ( old_spte & _PAGE_PRESENT )
@@ -618,15 +649,18 @@ validate_pde_change(
     unsigned long old_spde = *shadow_pde_p;
     unsigned long new_spde;
 
-    perfc_incrc(validate_pde_change);
+    perfc_incrc(validate_pde_calls);
 
     l2pde_propagate_from_guest(d, &new_pde, shadow_pde_p);
     new_spde = *shadow_pde_p;
 
     // only do the ref counting if something important changed.
     //
-    if ( (old_spde ^ new_spde) & (PAGE_MASK | _PAGE_PRESENT) )
+    if ( ((old_spde | new_spde) & _PAGE_PRESENT) &&
+         ((old_spde ^ new_spde) & (PAGE_MASK | _PAGE_PRESENT)) )
     {
+        perfc_incrc(validate_pde_changes);
+
         if ( new_spde & _PAGE_PRESENT )
             get_shadow_ref(new_spde >> PAGE_SHIFT);
         if ( old_spde & _PAGE_PRESENT )
@@ -720,16 +754,12 @@ static inline struct shadow_status *hash_bucket(
  *      It returns the shadow's mfn, or zero if it doesn't exist.
  */
 
-static inline unsigned long __shadow_status(
+static inline unsigned long ___shadow_status(
     struct domain *d, unsigned long gpfn, unsigned long stype)
 {
     struct shadow_status *p, *x, *head;
     unsigned long key = gpfn | stype;
 
-    ASSERT(spin_is_locked(&d->arch.shadow_lock));
-    ASSERT(gpfn == (gpfn & PGT_mfn_mask));
-    ASSERT(stype && !(stype & ~PGT_type_mask));
-
     perfc_incrc(shadow_status_calls);
 
     x = head = hash_bucket(d, gpfn);
@@ -779,6 +809,27 @@ static inline unsigned long __shadow_status(
     return 0;
 }
 
+static inline unsigned long __shadow_status(
+    struct domain *d, unsigned long gpfn, unsigned long stype)
+{
+    unsigned long gmfn = __gpfn_to_mfn(d, gpfn);
+
+    ASSERT(spin_is_locked(&d->arch.shadow_lock));
+    ASSERT(gpfn == (gpfn & PGT_mfn_mask));
+    ASSERT(stype && !(stype & ~PGT_type_mask));
+
+    if ( gmfn && ((stype != PGT_snapshot)
+                  ? !mfn_is_page_table(gmfn)
+                  : !mfn_out_of_sync(gmfn)) )
+    {
+        perfc_incrc(shadow_status_shortcut);
+        ASSERT(___shadow_status(d, gpfn, stype) == 0);
+        return 0;
+    }
+
+    return ___shadow_status(d, gmfn, stype);
+}
+
 /*
  * Not clear if pull-to-front is worth while for this or not,
  * as it generally needs to scan the entire bucket anyway.
index 5e38e38924fb7d11f5773ed984b4e02a081a0021..4402020a6da9b224a3f2a190ddd06c2a45340443 100644 (file)
@@ -38,7 +38,8 @@ PERFSTATUS( shadow_l1_pages, "current # shadow L1 pages" )
 PERFSTATUS( hl2_table_pages, "current # hl2 pages" )
 PERFSTATUS( snapshot_pages,  "current # fshadow snapshot pages" )
 
-PERFCOUNTER_CPU(shadow_status_calls,    "calls to __shadow_status" )
+PERFCOUNTER_CPU(shadow_status_shortcut, "fastpath miss on shadow cache")
+PERFCOUNTER_CPU(shadow_status_calls,    "calls to ___shadow_status" )
 PERFCOUNTER_CPU(shadow_status_miss,     "missed shadow cache" )
 PERFCOUNTER_CPU(shadow_status_hit_head, "hits on head of bucket" )
 PERFCOUNTER_CPU(check_pagetable,        "calls to check_pagetable" )
@@ -59,5 +60,7 @@ PERFCOUNTER_CPU(shadow_fault_bail_pde_not_present, "sf bailed due to pde not pre
 PERFCOUNTER_CPU(shadow_fault_bail_pte_not_present, "sf bailed due to pte not present")
 PERFCOUNTER_CPU(shadow_fault_bail_ro_mapping,      "sf bailed due to a ro mapping")
 PERFCOUNTER_CPU(shadow_fault_fixed,                "sf fixed the pgfault")
-PERFCOUNTER_CPU(validate_pte_change,               "calls to validate_pte_change")
-PERFCOUNTER_CPU(validate_pde_change,               "calls to validate_pde_change")
+PERFCOUNTER_CPU(validate_pte_calls,                "calls to validate_pte_change")
+PERFCOUNTER_CPU(validate_pte_changes,              "validate_pte makes changes")
+PERFCOUNTER_CPU(validate_pde_calls,                "calls to validate_pde_change")
+PERFCOUNTER_CPU(validate_pde_changes,              "validate_pde makes changes")